In [1]:
import numpy as np
from sklearn import svm, cross_validation
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import h5py
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import sys
sys.path.append("../")
In [2]:
# Data to use
Ndata = 10000
Nside = 30
# First we load the file
file_location = '../results_database/text_wall_street_columns_30.hdf5'
# Now we need to get the letters and align them
text_directory = '../data/wall_street_letters_30.npy'
letters_sequence = np.load(text_directory)
Nletters = len(letters_sequence)
symbols = set(letters_sequence)
targets = []
for index in range(Ndata):
letter_index = index // Nside
targets.append(letters_sequence[letter_index])
# Transform to array
targets = np.array(targets)
print(list(enumerate(targets[0:40])))
In [3]:
# Calculate the predictions
scores_mixed = []
scores_indp = []
Ntime_clusters_set = np.arange(10, 55, 5)
# Nexa parameters
max_lag = 10
Nembedding = 3
Nspatial_clusters = max_lag
In [4]:
for Ntime_clusters in Ntime_clusters_set:
print(Ntime_clusters)
Nspatial_clusters = max_lag
# Here calculate the scores for the mixes
run_name = '/test'
f = h5py.File(file_location, 'r')
parameters_string = '/' + str(Nspatial_clusters)
parameters_string += '-' + str(Ntime_clusters)
parameters_string += '-' + str(Nembedding)
nexa = f[run_name + parameters_string]
cluster_to_index = nexa['cluster_to_index']
code_vectors_softmax = np.array(nexa['code-vectors-softmax'])
# Now we need to classify
X = code_vectors_softmax[:Ndata]
y = targets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)
clf_linear = LinearDiscriminantAnalysis()
clf_linear.fit(X_train, y_train)
score = clf_linear.score(X_test, y_test) * 100.0
scores_mixed.append(score)
# Here calculate the scores for the independent
run_name = '/indep'
f = h5py.File(file_location, 'r')
parameters_string = '/' + str(Nspatial_clusters)
parameters_string += '-' + str(Ntime_clusters)
parameters_string += '-' + str(Nembedding)
nexa = f[run_name + parameters_string]
cluster_to_index = nexa['cluster_to_index']
code_vectors_softmax = np.array(nexa['code-vectors-softmax'])
# Now we need to classify
X = code_vectors_softmax[:Ndata]
y = targets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)
clf_linear = LinearDiscriminantAnalysis()
clf_linear.fit(X_train, y_train)
score = clf_linear.score(X_test, y_test) * 100.0
scores_indp.append(score)
In [5]:
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
ax.plot(Ntime_clusters_set,scores_indp, 'o-', label='independent', lw=2, markersize=10)
ax.plot(Ntime_clusters_set, scores_mixed, 'o-', label='mixed', lw=2, markersize=10)
ax.set_ylim(0, 105)
ax.set_ylabel('Accuracy')
ax.set_xlabel('Ndata clusters')
ax.set_title('Accuracy vs Ndata_clusters (Same letter)')
ax.legend()
Out[5]:
In [6]:
# Data to use
Ndata = 10000
Nside = 30
# First we load the file
file_location = '../results_database/text_wall_street_columns_30.hdf5'
# Now we need to get the letters and align them
text_directory = '../data/wall_street_letters_30.npy'
letters_sequence = np.load(text_directory)
Nletters = len(letters_sequence)
symbols = set(letters_sequence)
targets = []
for index in range(Ndata):
letter_index = index // Nside
targets.append(letters_sequence[letter_index + 1])
# Transform to array
targets = np.array(targets)
print(list(enumerate(targets[0:40])))
In [7]:
# Calculate the predictions
scores_mixed = []
scores_indp = []
Ntime_clusters_set = np.arange(10, 55, 5)
# Nexa parameters
max_lag = 10
Nembedding = 3
Nspatial_clusters = max_lag
In [8]:
for Ntime_clusters in Ntime_clusters_set:
print(Ntime_clusters)
Nspatial_clusters = max_lag
# Here calculate the scores for the mixes
run_name = '/test'
f = h5py.File(file_location, 'r')
parameters_string = '/' + str(Nspatial_clusters)
parameters_string += '-' + str(Ntime_clusters)
parameters_string += '-' + str(Nembedding)
nexa = f[run_name + parameters_string]
cluster_to_index = nexa['cluster_to_index']
code_vectors_softmax = np.array(nexa['code-vectors-softmax'])
# Now we need to classify
X = code_vectors_softmax[:Ndata]
y = targets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)
clf_linear = LinearDiscriminantAnalysis()
clf_linear.fit(X_train, y_train)
score = clf_linear.score(X_test, y_test) * 100.0
scores_mixed.append(score)
# Here calculate the scores for the independent
run_name = '/indep'
f = h5py.File(file_location, 'r')
parameters_string = '/' + str(Nspatial_clusters)
parameters_string += '-' + str(Ntime_clusters)
parameters_string += '-' + str(Nembedding)
nexa = f[run_name + parameters_string]
cluster_to_index = nexa['cluster_to_index']
code_vectors_softmax = np.array(nexa['code-vectors-softmax'])
# Now we need to classify
X = code_vectors_softmax[:Ndata]
y = targets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)
clf_linear = LinearDiscriminantAnalysis()
clf_linear.fit(X_train, y_train)
score = clf_linear.score(X_test, y_test) * 100.0
scores_indp.append(score)
In [9]:
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
ax.plot(Ntime_clusters_set,scores_indp, 'o-', label='independent', lw=2, markersize=10)
ax.plot(Ntime_clusters_set, scores_mixed, 'o-', label='mixed', lw=2, markersize=10)
ax.set_ylim(0, 105)
ax.set_ylabel('Accuracy')
ax.set_xlabel('Ndata Clusters')
ax.set_title('Accuracy vs Ndata_clusters (Next letter)')
ax.legend()
Out[9]:
In [ ]: